# Check requisite packages are installed.
packages <- c(
"plotly",
"dplyr"
)
for (pkg in packages) {
library(pkg, character.only = TRUE)
}
package 㤼㸱plotly㤼㸲 was built under R version 4.0.5Loading required package: ggplot2
package 㤼㸱ggplot2㤼㸲 was built under R version 4.0.3Registered S3 method overwritten by 'data.table':
method from
print.data.table
Registered S3 methods overwritten by 'htmltools':
method from
print.html tools:rstudio
print.shiny.tag tools:rstudio
print.shiny.tag.list tools:rstudio
Registered S3 method overwritten by 'htmlwidgets':
method from
print.htmlwidget tools:rstudio
Attaching package: 㤼㸱plotly㤼㸲
The following object is masked from 㤼㸱package:ggplot2㤼㸲:
last_plot
The following object is masked from 㤼㸱package:stats㤼㸲:
filter
The following object is masked from 㤼㸱package:graphics㤼㸲:
layout
package 㤼㸱dplyr㤼㸲 was built under R version 4.0.4
Attaching package: 㤼㸱dplyr㤼㸲
The following objects are masked from 㤼㸱package:stats㤼㸲:
filter, lag
The following objects are masked from 㤼㸱package:base㤼㸲:
intersect, setdiff, setequal, union
dirViking <- c(
file.path(
getwd(), "LCAB_LawMorton1996-NumericalPoolCommunityScaling"
),
file.path(
getwd(), "LCAB_LawMorton1996-NumericalPoolCommunityScaling2"
)
)
dirVikingResults <- file.path(
dirViking, c("results-2021-04", "save-2021-05-10") # Latter not 100% yet.
)
resultFormat <- paste0(
"run-",
"%d", # Combination Number, or CombnNum.
"-",
"%s", # Run Seed.
".RDS"
)
# Copied from LawMorton1996-NumericalPoolCommunityScaling-Calculation.R
# TODO: In the future, make this a separate file that everyone can call...
set.seed(38427042)
basal <- c(3, 10, 30, 100, 300, 1000)
consumer <- c(3, 10, 30, 100, 300, 1000) * 2
events <- (max(basal) + max(consumer)) * 2
runs <- 100
logBodySize <- c(-2, -1, -1, 1) # Morton and Law 1997 version.
parameters <- c(0.01, 10, 0.5, 0.2, 100, 0.1)
# Need to rerun seedsPrep to get the random number generation right for seedsRun
seedsPrep <- runif(2 * length(basal) * length(consumer)) * 1E8
seedsRun <- runif(runs * length(basal) * length(consumer)) * 1E8
paramFrame <- with(list(
b = rep(basal, times = length(consumer)),
c = rep(consumer, each = length(basal)),
s1 = seedsPrep[1:(length(basal) * length(consumer))],
s2 = seedsPrep[
(length(basal) * length(consumer) + 1):(
2 * length(basal) * length(consumer))
],
sR = seedsRun
), {
temp <- data.frame(
CombnNum = 0,
Basals = b,
Consumers = c,
SeedPool = s1,
SeedMat = s2,
SeedRuns = "",
SeedRunsNum = 0,
EndStates = I(rep(list(""), length(b))),
EndStatesNum = 0,
EndStateSizes = I(rep(list(""), length(b))),
EndStateSizesNum = NA,
EndStateAssembly = I(rep(list(""), length(b))),
EndStateAbundance = I(rep(list(""), length(b))),
Dataset = "2021-04",
DatasetID = 1,
stringsAsFactors = FALSE
)
for (i in 1:nrow(temp)) {
seeds <- sR[((i - 1) * runs + 1) : (i * runs)]
temp$SeedRuns[i] <- toString(seeds) # CSV
temp$SeedRunsNum[i] <- length(seeds)
}
temp$CombnNum <- 1:nrow(temp)
temp
})
# Note: n + 2 end states. Failure to finish, failure to obtain state, and state.
for (i in 1:nrow(paramFrame)) {
resultsList <- list(
"No Run" = 0,
"No State" = 0
)
resultsSize <- list(
"0" = 0
)
resultsAssembly <- list(
"No Run" = data.frame(),
"No State" = data.frame()
)
seeds <- unlist(strsplit(paramFrame$SeedRuns[i], ', '))
for (seed in seeds) {
fileName <- file.path(
dirVikingResults[paramFrame$DatasetID[i]],
sprintf(resultFormat, paramFrame$CombnNum[i], seed)
)
if (file.exists(fileName)) {
temp <- load(fileName)
temp <- eval(parse(text = temp)) # Get objects.
if (is.data.frame(temp)) {
community <- toString(
temp[[ncol(temp)]][[nrow(temp)]]
)
size <- toString(length(temp[[ncol(temp)]][[nrow(temp)]]))
if (community == "") {
resultsList$`No State` <- resultsList$`No State` + 1
resultsSize$`0` <- resultsSize$`0` + 1
} else if (community %in% names(resultsList)) {
resultsList[[community]] <- resultsList[[community]] + 1
resultsSize[[size]] <- resultsSize[[size]] + 1
} else {
resultsList[[community]] <- 1
resultsAssembly[[community]] <- temp
if (size %in% resultsSize) {
resultsSize[[size]] <- resultsSize[[size]] + 1
} else {
resultsSize[[size]] <- 1
}
}
} else {
resultsList$`No State` <- resultsList$`No State` + 1
resultsSize$`0` <- resultsSize$`0` + 1
}
} else {
resultsList$`No Run` <- resultsList$`No Run` + 1
resultsSize$`0` <- resultsSize$`0` + 1
}
}
paramFrame$EndStates[[i]] <- resultsList
paramFrame$EndStatesNum[i] <- length(resultsList) - 2 # ! No State, No Run
paramFrame$EndStateSizes[[i]] <- resultsSize
paramFrame$EndStateSizesNum[i] <- length(resultsSize) - 1 # ! 0
paramFrame$EndStateAssembly[[i]] <- resultsAssembly
}
source(
file.path(getwd(),
"LawMorton1996-NumericalPoolCommunityScaling-Settings2.R")
)
oldNrow <- nrow(paramFrame)
paramFrame <- rbind(paramFrame, with(list(
b = rep(basal, times = length(consumer)),
c = rep(consumer, each = length(basal)),
s1 = seedsPrep[1:(length(basal) * length(consumer))],
s2 = seedsPrep[
(length(basal) * length(consumer) + 1):(
2 * length(basal) * length(consumer))
],
sR = seedsRun
), {
temp <- data.frame(
CombnNum = 0,
Basals = b,
Consumers = c,
SeedPool = s1,
SeedMat = s2,
SeedRuns = "",
SeedRunsNum = 0,
EndStates = I(rep(list(""), length(b))),
EndStatesNum = 0,
EndStateSizes = I(rep(list(""), length(b))),
EndStateSizesNum = NA,
EndStateAssembly = I(rep(list(""), length(b))),
EndStateAbundance = I(rep(list(""), length(b))),
Dataset = "2021-05",
DatasetID = 2,
stringsAsFactors = FALSE
)
for (i in 1:nrow(temp)) {
seeds <- sR[((i - 1) * runs + 1) : (i * runs)]
temp$SeedRuns[i] <- toString(seeds) # CSV
temp$SeedRunsNum[i] <- length(seeds)
}
temp$CombnNum <- 1:nrow(temp)
temp
})
)
# Note: n + 2 end states. Failure to finish, failure to obtain state, and state.
# Modified from above, but with the abundance recorded.
for (i in (oldNrow + 1):nrow(paramFrame)) {
resultsList <- list(
"No Run" = 0,
"No State" = 0
)
resultsSize <- list(
"0" = 0
)
resultsAssembly <- list(
"No Run" = data.frame(),
"No State" = data.frame()
)
resultsAbund <- list(
"No Run" = "",
"No State" = ""
)
seeds <- unlist(strsplit(paramFrame$SeedRuns[i], ', '))
for (seed in seeds) {
fileName <- file.path(
dirVikingResults[paramFrame$DatasetID[i]],
sprintf(resultFormat, paramFrame$CombnNum[i], seed)
)
if (file.exists(fileName)) {
temp <- load(fileName)
temp <- eval(parse(text = temp)) # Get objects.
if (is.list(temp) && "Result" %in% names(temp)) {
if (is.data.frame(temp$Result))
community <- temp$Result$Community[[nrow(temp$Result)]]
else
community <- temp$Result
size <- toString(length(community))
if (community[1] != "")
abund <- toString(temp$Abund[community + 1])
else
abund <- ""
community <- toString(community)
if (community == "") {
resultsList$`No State` <- resultsList$`No State` + 1
resultsSize$`0` <- resultsSize$`0` + 1
} else if (community %in% names(resultsList)) {
resultsList[[community]] <- resultsList[[community]] + 1
resultsSize[[size]] <- resultsSize[[size]] + 1
} else {
resultsList[[community]] <- 1
resultsAssembly[[community]] <- temp
resultsAbund[[community]] <- abund
if (size %in% resultsSize) {
resultsSize[[size]] <- resultsSize[[size]] + 1
} else {
resultsSize[[size]] <- 1
}
}
} else {
resultsList$`No State` <- resultsList$`No State` + 1
resultsSize$`0` <- resultsSize$`0` + 1
}
} else {
resultsList$`No Run` <- resultsList$`No Run` + 1
resultsSize$`0` <- resultsSize$`0` + 1
}
}
paramFrame$EndStates[[i]] <- resultsList
paramFrame$EndStatesNum[i] <- length(resultsList) - 2 # ! No State, No Run
paramFrame$EndStateSizes[[i]] <- resultsSize
paramFrame$EndStateSizesNum[i] <- length(resultsSize) - 1 # ! 0
paramFrame$EndStateAssembly[[i]] <- resultsAssembly
paramFrame$EndStateAbundance[[i]] <- resultsAbund
}
# X, Y, Basal and Consumer.
# Z = Sizes of the Endstates.
plotScalingData <- data.frame(
CombnNum = rep(paramFrame$CombnNum, paramFrame$EndStatesNum),
Basals = rep(paramFrame$Basals, paramFrame$EndStatesNum),
Consumers = rep(paramFrame$Consumers, paramFrame$EndStatesNum),
Dataset = rep(paramFrame$Dataset, paramFrame$EndStatesNum),
DatasetID = rep(paramFrame$DatasetID, paramFrame$EndStatesNum)
)
# Communities
comms <- unlist(lapply(paramFrame$EndStates, names))
freqs <- unlist(paramFrame$EndStates)
asmbl <- unlist(paramFrame$EndStateAssembly, recursive = FALSE)
asmbl <- asmbl[comms != "No Run" & comms != "No State"]
freqs <- freqs[comms != "No Run" & comms != "No State"]
comms <- comms[comms != "No Run" & comms != "No State"]
asmbl <- lapply(asmbl, function(d) {
if ("Result.Outcome" %in% names(d))
d %>% dplyr::filter(Result.Outcome != "Type 1 (Failure)" &
Result.Outcome != "Present")
else
d$Result %>% dplyr::filter(Outcome != "Type 1 (Failure)" &
Outcome != "Present")
})
plotScalingData$Communities <- comms
plotScalingData$CommunityFreq <- freqs
plotScalingData$CommunitySeq <- asmbl
# Community Size
temp <- unlist(lapply(strsplit(plotScalingData$Communities, ','), length))
plotScalingData$CommunitySize <- temp
# For usage by the reader.
plotScaling <- plotly::plot_ly(
plotScalingData,
x = ~Basals,
y = ~Consumers,
z = ~CommunitySize,
color = ~Dataset
)
plotScaling <- plotly::add_markers(plotScaling)
plotScaling <- plotly::layout(
plotScaling,
scene = list(
xaxis = list(type = "log"),
yaxis = list(type = "log")
)
)
plotScaling
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
plotScalingData %>% dplyr::select(-CommunitySeq)
Check with dataset as a part of the model. (They should not be, as they are generated by the same code, but different seeds.)
plotSamplingDataLM1 <- lm(
log(CommunitySize) ~ log(Basals) + log(Consumers) + Dataset,
data = plotScalingData
)
summary(plotSamplingDataLM1)
Call:
lm(formula = log(CommunitySize) ~ log(Basals) + log(Consumers) +
Dataset, data = plotScalingData)
Residuals:
Min 1Q Median 3Q Max
-0.83712 -0.17860 0.01358 0.20886 0.78899
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.91641 0.11947 7.670 5.05e-11 ***
log(Basals) 0.23818 0.02014 11.825 < 2e-16 ***
log(Consumers) 0.03652 0.01856 1.968 0.0528 .
Dataset2021-05 0.07433 0.07197 1.033 0.3050
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.3089 on 75 degrees of freedom
Multiple R-squared: 0.677, Adjusted R-squared: 0.664
F-statistic: 52.39 on 3 and 75 DF, p-value: < 2.2e-16
Datasets do not look to have a statistically significant effect. Without comparing the models (would need to do in any publication), the model without the dataset effect is:
plotSamplingDataLM2 <- lm(
log(CommunitySize) ~ log(Basals) + log(Consumers),
data = plotScalingData
)
summary(plotSamplingDataLM2)
Call:
lm(formula = log(CommunitySize) ~ log(Basals) + log(Consumers),
data = plotScalingData)
Residuals:
Min 1Q Median 3Q Max
-0.86732 -0.17301 -0.02186 0.20156 0.74221
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.93258 0.11850 7.870 1.95e-11 ***
log(Basals) 0.24312 0.01957 12.421 < 2e-16 ***
log(Consumers) 0.03800 0.01851 2.053 0.0435 *
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.309 on 76 degrees of freedom
Multiple R-squared: 0.6724, Adjusted R-squared: 0.6638
F-statistic: 77.99 on 2 and 76 DF, p-value: < 2.2e-16
Adding this to the plot:
# E.g. https://stackoverflow.com/a/38533046
plotSamplingDataLM2Res <- 3
plotSamplingDataLM2AxisX <- sort(unique(plotScalingData$Basals))
# seq(min(plotScalingData$Basals),
# max(plotScalingData$Basals),
# by = plotSamplingDataLM2Res)
plotSamplingDataLM2AxisY <- sort(unique(plotScalingData$Consumers))
# seq(min(plotScalingData$Consumers),
# max(plotScalingData$Consumers),
# by = plotSamplingDataLM2Res)
plotSamplingDataLM2Surf <- expand.grid(
Basals = plotSamplingDataLM2AxisX,
Consumers = plotSamplingDataLM2AxisY
)
plotSamplingDataLM2Surf$CommunitySize <- exp(predict.lm(
plotSamplingDataLM2, newdata = plotSamplingDataLM2Surf
))
plotSamplingDataLM2Surf <- reshape2::acast( # Cast to array/matrix
plotSamplingDataLM2Surf, Basals ~ Consumers, value.var = "CommunitySize"
)
plotScaling %>% plotly::add_trace(z = plotSamplingDataLM2Surf,
x = plotSamplingDataLM2AxisX,
y = plotSamplingDataLM2AxisY,
type = "surface")
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
Not particularly convincing, but it does agree with general ideas (consumers less important than basals).
plotSamplingDataLM3 <- lm(
log(CommunitySize) ~ log(Basals) + log(Consumers) - 1,
data = plotScalingData
)
summary(plotSamplingDataLM3)
Call:
lm(formula = log(CommunitySize) ~ log(Basals) + log(Consumers) -
1, data = plotScalingData)
Residuals:
Min 1Q Median 3Q Max
-0.80814 -0.13633 0.05056 0.40151 1.03117
Coefficients:
Estimate Std. Error t value Pr(>|t|)
log(Basals) 0.33969 0.02041 16.642 < 2e-16 ***
log(Consumers) 0.14852 0.01614 9.202 4.89e-14 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.4136 on 77 degrees of freedom
Multiple R-squared: 0.9598, Adjusted R-squared: 0.9587
F-statistic: 918.9 on 2 and 77 DF, p-value: < 2.2e-16
# E.g. https://stackoverflow.com/a/38533046
plotSamplingDataLM3Res <- 3
plotSamplingDataLM3AxisX <- sort(unique(plotScalingData$Basals))
# seq(min(plotScalingData$Basals),
# max(plotScalingData$Basals),
# by = plotSamplingDataLM2Res)
plotSamplingDataLM3AxisY <- sort(unique(plotScalingData$Consumers))
# seq(min(plotScalingData$Consumers),
# max(plotScalingData$Consumers),
# by = plotSamplingDataLM2Res)
plotSamplingDataLM3Surf <- expand.grid(
Basals = plotSamplingDataLM3AxisX,
Consumers = plotSamplingDataLM3AxisY
)
plotSamplingDataLM3Surf$CommunitySize <- exp(predict.lm(
plotSamplingDataLM3, newdata = plotSamplingDataLM3Surf
))
plotSamplingDataLM3Surf <- reshape2::acast( # Cast to array/matrix
plotSamplingDataLM3Surf, Basals ~ Consumers, value.var = "CommunitySize"
)
plotScaling %>% plotly::add_trace(z = plotSamplingDataLM3Surf,
x = plotSamplingDataLM3AxisX,
y = plotSamplingDataLM3AxisY,
type = "surface")
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
plotSamplingDataLM4 <- lm(
CommunitySize ~ log(Basals) + log(Consumers),
data = plotScalingData
)
summary(plotSamplingDataLM4)
Call:
lm(formula = CommunitySize ~ log(Basals) + log(Consumers), data = plotScalingData)
Residuals:
Min 1Q Median 3Q Max
-4.867 -1.628 -0.500 1.427 9.556
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.09911 0.98519 0.101 0.9201
log(Basals) 1.93666 0.16273 11.901 <2e-16 ***
log(Consumers) 0.26912 0.15390 1.749 0.0844 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 2.569 on 76 degrees of freedom
Multiple R-squared: 0.6525, Adjusted R-squared: 0.6433
F-statistic: 71.34 on 2 and 76 DF, p-value: < 2.2e-16
# E.g. https://stackoverflow.com/a/38533046
plotSamplingDataLM4Res <- 3
plotSamplingDataLM4AxisX <- sort(unique(plotScalingData$Basals))
# seq(min(plotScalingData$Basals),
# max(plotScalingData$Basals),
# by = plotSamplingDataLM2Res)
plotSamplingDataLM4AxisY <- sort(unique(plotScalingData$Consumers))
# seq(min(plotScalingData$Consumers),
# max(plotScalingData$Consumers),
# by = plotSamplingDataLM2Res)
plotSamplingDataLM4Surf <- expand.grid(
Basals = plotSamplingDataLM3AxisX,
Consumers = plotSamplingDataLM3AxisY
)
plotSamplingDataLM4Surf$CommunitySize <- exp(predict.lm(
plotSamplingDataLM4, newdata = plotSamplingDataLM4Surf
))
plotSamplingDataLM4Surf <- reshape2::acast( # Cast to array/matrix
plotSamplingDataLM4Surf, Basals ~ Consumers, value.var = "CommunitySize"
)
plotScaling %>% plotly::add_trace(z = plotSamplingDataLM4Surf,
x = plotSamplingDataLM4AxisX,
y = plotSamplingDataLM4AxisY,
type = "surface")
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
minimal value for n is 3, returning requested palette with 3 different levels
# > runif(1) * 1E8
# [1] 82598679
set.seed(82598679)
mats <- list()
poolsall <- list() # name pools used in save data; be careful!
for (i in 1:length(dirViking)) {
temp <- load(file.path(
dirViking[i],
paste0("LawMorton1996-NumericalPoolCommunityScaling-PoolMats",
if (i > 1) i else "",
".RDS")
))
mats[[i]] <- eval(parse(text = temp[1]))
poolsall[[i]] <- eval(parse(text = temp[2]))
}
pools <- poolsall
oldCandidateData <- load(file.path(getwd(), "candidateDataSoFar.Rdata"))
oldCandidateData <- eval(parse(text = oldCandidateData))
candidateData <- plotScalingData %>% dplyr::group_by(
CombnNum, Dataset
) %>% dplyr::mutate(
OtherSteadyStates = dplyr::n() - 1
) %>% dplyr::filter(
OtherSteadyStates > 0
)
candidateData %>% dplyr::select(-CommunitySeq)
# First, check if it is in the paramFrame.
# Second, check if it is in the saved data from the previous.
# Otherwise, ignore it, we'll figure out what it is and why it is missing later.
candidateData$CommunityAbund <- ""
for (r in 1:nrow(candidateData)) {
# ID 1:4 are used to identify paramFrame, 5 used to identify abundance
ID <- candidateData[r, 1:6]
paramFrameRow <- paramFrame %>% dplyr::filter(
CombnNum == ID$CombnNum,
Basals == ID$Basals,
Consumers == ID$Consumers,
Dataset == ID$Dataset
)
if (is.list(paramFrameRow$EndStateAbundance[[1]])) {
entry <- which(ID$Communities == names(paramFrameRow$EndStateAbundance[[1]]))
if (length(entry)) {
candidateData$CommunityAbund[r] <- paramFrameRow$EndStateAbundance[[1]][[entry]]
next()
}
}
if (ID$Dataset == "2021-04") {
oldCandDatRow <- oldCandidateData %>% dplyr::filter(
CombnNum == ID$CombnNum,
Basals == ID$Basals,
Consumers == ID$Consumers,
Communities == ID$Communities
)
if (nrow(oldCandDatRow) > 0) {
if (oldCandDatRow$CommunityAbund != "") {
candidateData$CommunityAbund[r] <- oldCandDatRow$CommunityAbund
}
}
}
}
candidateData <- candidateData %>% dplyr::filter(CommunityAbund != "",
CommunityAbund != "Failure")
candidateData$CommunityProd <- NA
for (r in 1:nrow(candidateData)) {
candidateData$CommunityProd[r] <- with(
candidateData[r, ],
RMTRCode2::Productivity(
Pool = pools[[DatasetID]][[CombnNum]],
InteractionMatrix = mats[[DatasetID]][[CombnNum]],
Community = Communities,
Populations = CommunityAbund
)
)
}
islandFUN <- function(i, dat, pool, mat, dmat) {
temp <- dat[i, ]
RMTRCode2::IslandDynamics(
Pool = pool,
InteractionMatrix = mat,
Communities = c(
list(temp$Communities[1]),
rep("", nrow(dmat) - 2),
temp$Communities[2]
),
Populations = c(
list(temp$CommunityAbund[1]),
rep("", nrow(dmat) - 2),
list(temp$CommunityAbund[2])
),
DispersalPool = 0.0001,
DispersalIsland = dmat,
)
}
# For each group-dataset,
# For each pair,
# Run Island Dynamics,
# Save the result with its pairing
candidateData$TotalID <- paste(candidateData$CombnNum, candidateData$DatasetID)
islandInteractionsOneTwo <- list()
for (grp in unique(candidateData$TotalID)) {
candidateDataSubset <- candidateData %>% dplyr::filter(TotalID == grp)
if (nrow(candidateDataSubset) == 1) next()
pairingResults <- combn(
nrow(candidateDataSubset), 2,
islandFUN,
dat = candidateDataSubset,
pool = pools[[
candidateDataSubset$DatasetID[1]
]][[candidateDataSubset$CombnNum[1]]],
mat = mats[[
candidateDataSubset$DatasetID[1]
]][[candidateDataSubset$CombnNum[1]]],
dmat = matrix(c(0, 1, 1, 0), nrow = 2, ncol = 2),
simplify = FALSE
)
pairingResults <- lapply(
pairingResults, function(mat, isles) {
mat <- mat[nrow(mat), -1]
retVal <- list()
species <- length(mat) / isles
for (i in 1:isles) {
retVal[[i]] <- mat[((i - 1) * species + 1) : (i * species)]
}
retVal
},
isles = 2
)
islandInteractionsOneTwo[[grp]] <- pairingResults
}
islandInteractionsOneEmptyTwo <- list()
for (grp in unique(candidateData$TotalID)) {
candidateDataSubset <- candidateData %>% dplyr::filter(TotalID == grp)
if (nrow(candidateDataSubset) == 1) next()
pairingResults <- combn(
nrow(candidateDataSubset), 2,
islandFUN,
dat = candidateDataSubset,
pool = pools[[
candidateDataSubset$DatasetID[1]
]][[candidateDataSubset$CombnNum[1]]],
mat = mats[[
candidateDataSubset$DatasetID[1]
]][[candidateDataSubset$CombnNum[1]]],
dmat = matrix(c(
0, 1, 0, # Island 2 -> 1
1, 0, 1, # Island 1 -> 2, Island 3 -> 2
0, 1, 0 # Island 2 -> 3
), nrow = 3, ncol = 3, byrow = TRUE),
simplify = FALSE
)
pairingResults <- lapply(
pairingResults, function(mat, isles) {
mat <- mat[nrow(mat), -1]
retVal <- list()
species <- length(mat) / isles
for (i in 1:isles) {
retVal[[i]] <- mat[((i - 1) * species + 1) : (i * species)]
}
retVal
},
isles = 3
)
islandInteractionsOneEmptyTwo[[grp]] <- pairingResults
}
# Format of table should be:
# ID, Community 1, Community 2, Outcomes 1-2, Outcomes 1-0-2
# For outcomes, species presence will be used.
communities <- NULL
totalCommunities <- NULL
for (grp in unique(candidateData$TotalID)) {
candidateDataSubset <- candidateData %>% dplyr::filter(TotalID == grp)
if (nrow(candidateDataSubset) > 1) {
newCommunities <- combn(
candidateDataSubset$Communities, 2,
)
communities <- c(communities, newCommunities)
totalCommunities <- c(
totalCommunities,
toString(sort(unique(unlist(lapply(newCommunities,
RMTRCode2::CsvRowSplit)))))
)
}
}
islandInteractionsOneTwoWhich <- unlist(lapply(
seq_along(islandInteractionsOneTwo), function(i, x, tC) {
lapply(x[[i]], function(y, tC) {
lapply(y, function(z, tC) {
toString(RMTRCode2::CsvRowSplit(tC)[which(z > 1E-6)])
}, tC = tC)
},
tC = tC[i])
},
x = islandInteractionsOneTwo,
tC = totalCommunities))
islandInteractionsOneEmptyTwoWhich <- unlist(lapply(
seq_along(islandInteractionsOneEmptyTwo), function(i, x, tC) {
lapply(x[[i]], function(y, tC) {
lapply(y, function(z, tC) {
toString(RMTRCode2::CsvRowSplit(tC)[which(z > 1E-6)])
}, tC = tC)
},
tC = tC[i])
},
x = islandInteractionsOneEmptyTwo,
tC = totalCommunities))
islandInteractionResults <- data.frame(
DatasetID = rep(names(islandInteractionsOneTwo),
unlist(lapply(islandInteractionsOneTwo, length))),
Community1 = communities[seq(from = 1, to = length(communities), by = 2)],
Community2 = communities[seq(from = 2, to = length(communities), by = 2)],
OutcomeWOEmpty_Island1 = islandInteractionsOneTwoWhich[
seq(from = 1, to = length(islandInteractionsOneTwoWhich), by = 2)],
OutcomeWOEmpty_Island2 = islandInteractionsOneTwoWhich[
seq(from = 1, to = length(islandInteractionsOneTwoWhich), by = 2)],
OutcomeWEmpty_Island1 = islandInteractionsOneEmptyTwoWhich[
seq(from = 1, to = length(islandInteractionsOneEmptyTwoWhich), by = 3)],
OutcomeWEmpty_Island2 = islandInteractionsOneEmptyTwoWhich[
seq(from = 1, to = length(islandInteractionsOneEmptyTwoWhich), by = 3)],
OutcomeWEmpty_Island3 = islandInteractionsOneEmptyTwoWhich[
seq(from = 1, to = length(islandInteractionsOneEmptyTwoWhich), by = 3)]
)
islandInteractionResults
islandInteractionResults %>% dplyr::mutate(
C1WOInvaded = Community1 != OutcomeWOEmpty_Island1,
C2WOInvaded = Community2 != OutcomeWOEmpty_Island2,
C1WInvaded = Community1 != OutcomeWEmpty_Island1,
C2WInvaded = Community2 != OutcomeWEmpty_Island3,
StalemateWO = !C1WOInvaded & !C2WOInvaded,
StalemateW = !C1WInvaded & !C2WInvaded,
HybridWO = C1WOInvaded & C2WOInvaded,
HybridW = C1WInvaded & C2WInvaded,
) %>% dplyr::select(-dplyr::starts_with("Outcome"))